svm: Handle MC4_MISC threshold register for guests
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Thu, 24 May 2007 12:30:29 +0000 (13:30 +0100)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Thu, 24 May 2007 12:30:29 +0000 (13:30 +0100)
The threshold register has been introduced in AMD RevF CPUs along with
SVM (Actually this MCA/MCE msr register existed before, but had no
meaning). Therefore no need for additional cpuid checks.

On read access it reports the HVM guest the register has been locked
by the BIOS. This means, it is not available for OS use. Thus, write
accesses are simply ignored. This behaviour actually matches real HW,
so guests can deal with this.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
xen/arch/x86/hvm/svm/svm.c
xen/include/asm-x86/msr.h

index ad847564d8bd04320e46c2405392c2742f77ad7c..f7c6efb3ec9432e7c6bed1d01298df4384835714 100644 (file)
@@ -179,6 +179,14 @@ static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
 
         break;
 
+    case MSR_K8_MC4_MISC: /* Threshold register */
+        /*
+         * MCA/MCE: Threshold register is reported to be locked, so we ignore
+         * all write accesses. This behaviour matches real HW, so guests should
+         * have no problem with this.
+         */
+        break;
+
     default:
         return 0;
     }
@@ -2062,6 +2070,14 @@ static inline void svm_do_msr_access(
             msr_content = v->arch.hvm_svm.cpu_shadow_efer;
             break;
 
+        case MSR_K8_MC4_MISC: /* Threshold register */
+            /*
+             * MCA/MCE: We report that the threshold register is unavailable
+             * for OS use (locked by the BIOS).
+             */
+            msr_content = 1ULL << 61; /* MC4_MISC.Locked */
+            break;
+
         default:
             if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
                  rdmsr_safe(ecx, eax, edx) == 0 )
index d1ea050b6826386376b4b22aa170b76fc59c50c9..3ed5265e6ec2e88dfe09997867debb8c5f6c02b8 100644 (file)
@@ -217,6 +217,27 @@ static inline void write_efer(__u64 val)
 #define MSR_IA32_MC0_ADDR              0x402
 #define MSR_IA32_MC0_MISC              0x403
 
+/* K8 Machine Check MSRs */
+#define MSR_K8_MC1_CTL                 0x404
+#define MSR_K8_MC1_STATUS              0x405
+#define MSR_K8_MC1_ADDR                        0x406
+#define MSR_K8_MC1_MISC                        0x407
+
+#define MSR_K8_MC2_CTL                 0x408
+#define MSR_K8_MC2_STATUS              0x409
+#define MSR_K8_MC2_ADDR                        0x40A
+#define MSR_K8_MC2_MISC                        0x40B
+
+#define MSR_K8_MC3_CTL                 0x40C
+#define MSR_K8_MC3_STATUS              0x40D
+#define MSR_K8_MC3_ADDR                        0x40E
+#define MSR_K8_MC3_MISC                        0x40F
+
+#define MSR_K8_MC4_CTL                 0x410
+#define MSR_K8_MC4_STATUS              0x411
+#define MSR_K8_MC4_ADDR                        0x412
+#define MSR_K8_MC4_MISC                        0x413
+
 /* Pentium IV performance counter MSRs */
 #define MSR_P4_BPU_PERFCTR0            0x300
 #define MSR_P4_BPU_PERFCTR1            0x301